This notebook implements the same analysis as previously, but with bond-length averaging of the results (symmetric stretch only).
29/07/21
TIDY version - see dev version for more notes.
NOTEs:
11/05/21
23/04/21
09/04/21
06/04/21 PH
Docs:
Bond scan dev notebook: http://100.86.127.24/jupyter/user/paul/lab/tree/ePS/XeF2_2020/epsman_2021/XeF2_epsman-gamess_tests_310321-incSymTests.ipynb
Previous processing (at equilibrium bond length, all orbs + expt.): https://phockett.github.io/ePSdata/XeF2-preliminary/XeF2_ePS-expt_comp_271020_4d_v111120-dist.html
Bond scan PES from Gamess results (Hartrees vs. Angs.), run for bondLengths = np.arange(1.5,3.1,0.05)

!hostname
!conda env list
import sys
import os
from pathlib import Path
import numpy as np
# import epsproc as ep
import xarray as xr
import matplotlib.pyplot as plt
from datetime import datetime as dt
timeString = dt.now()
import epsproc as ep
# Plotters
from epsproc.plot import hvPlotters
# Multijob class dev code
from epsproc.classes.multiJob import ePSmultiJob
hvPlotters.setPlotters(width = 1000, snsStyle='whitegrid')
# For class, above settings don't take, not sure why, something to do with namespaces/calling sequence?
# Overriding snsStyle does work however... although NOT CONSISTENTLY????
# AH, looks like ordering matters - set_style LAST (.set seems to override)
import seaborn as sns
sns.set(rc={'figure.figsize':(10,6)}) # Set figure size in inches
sns.set_context("paper")
sns.set_style("whitegrid") # Set plot style
sns.set_palette("Paired") # Set colour mapping
# Try direct fig type setting for PDF output figs
from IPython.display import set_matplotlib_formats
# set_matplotlib_formats('png', 'pdf')
set_matplotlib_formats('svg', 'pdf')
# xr.set_options(display_style='html')
# # Scan for subdirs, based on existing routine in getFiles()
fileBase = Path('/home/paul/ePS/XeF2_2020/XeF2_bondscan') # Data dir on Jake
# fileBase = Path('/home/paul/ePS/XeF2_2020/XeF2_heavy') # Data dir on Jake
data = ePSmultiJob(fileBase, verbose = 0)
keys = np.r_[np.arange(0,21+1,3), np.arange(32,32+21+1,3)]
keys
# # bondLengths = np.arange(1.5,3.1,0.05)
# # keys = np.arange(0,32,4)
# # keys = np.arange(0,12+1,4) # 1st run, OK to n=12, 2.100A
# # keys = np.arange(2,18+1,4) # 2nd run, OK to n=18, 2.400A
# # keys = np.arange(0,18+1,4) # 2nd run, OK to n=18, 2.400A
# # keys = np.arange(0,21+1,3) # 2nd run, OK to n=18, 2.400A
# keys = np.arange(0,18+1) # For interp testing, load ALL
# # keys = [19,20]
# All orbs case...
orbSet = 32
step = 1
keys = np.r_[np.arange(0,18+1,step), np.arange(orbSet,orbSet+18+1,step), np.arange(2*orbSet,2*orbSet+18+1,step)]
# print(keys)
# [print(n, item) for n,item in enumerate(data.jobs['jobDirs']) if n in keys];
# keys = [4,5,6] # Set for 4d data only
# data.scanFiles(keys = keys)
data.scanFiles(keys = keys, outputKeyType = 'dir') # With specified keys
# data.scanFiles(outputKeyType = 'dir') # All keys
data.jobsSummary()
# # Replace orb labels with bond lengths
# for key in data.data.keys():
# data.data[key]['jobNotes']['orbLabel'] = key.split('-')[1]
# Replace orb labels with bond lengths
for key in data.data.keys():
# data.data[key]['jobNotes']['orbLabel'] = key.split('-')[1]
data.data[key]['jobNotes']['orbLabel'] = key[3:5] + '-' + key.split('-')[1][0:3]
These are from ePolyScat's getCro function, and are LF (unaligned ensemble) results. This provides a good, if general, overview.
Erange = None # Reset for full range
orbSubset = [key for key in data.data.keys() if key.startswith('orb21')]
data.plotGetCroComp(pType='SIGMA', Etype=Etype, Erange=Erange, keys = orbSubset)
data.plotGetCroComp(pType='BETA', Etype=Etype, Erange=Erange, keys = orbSubset)
orbSubset = [key for key in data.data.keys() if key.startswith('orb22')]
data.plotGetCroComp(pType='SIGMA', Etype=Etype, Erange=Erange, keys = orbSubset)
data.plotGetCroComp(pType='BETA', Etype=Etype, Erange=Erange, keys = orbSubset)
orbSubset = [key for key in data.data.keys() if key.startswith('orb24')]
data.plotGetCroComp(pType='SIGMA', Etype=Etype, Erange=Erange, keys = orbSubset)
data.plotGetCroComp(pType='BETA', Etype=Etype, Erange=Erange, keys = orbSubset)
# Stack XS data to new data structure for BR processing
# Version 06/05/21 for bond-length scans - set to dict then stack to XR DataSet for processing.
# Dev for BR calcs, http://jake/jupyter/user/paul/lab/tree/ePS/N2O/epsproc/N2O_bondscan_NN_orb1-3_processing_070521.ipynb
# See also https://phockett.github.io/ePSdata/XeF2-preliminary/XeF2_ePS-expt_comp_271020_4d_v111120-dist.html#Branching-ratios
# Bit messy but working OK.
subset = {}
lText = []
Etype = 'Eke'
ErangeBR = [1, 300] # May be necessary to set this in some cases, set to None to skip
#**** Working version XR v0.14
# for orb in ['orb1','orb2', 'orb3']:
# # subset = {}
# for key in data.data.keys():
# if key.startswith(orb):
# temp = data.data[key]['XS'].sel({'Sym':'All'}).squeeze() #.copy().squeeze() #.drop('Cont')
# subset[key] = temp.copy().expand_dims({'Orb':[(key.split('-')[0])]}).expand_dims({'Bond':[float(key.split('-')[-1])]})
# dsXStemp = xr.Dataset(subset, compat='override') # This is working OK for subset as full dict with expanded dims, xr v0.14, but fails in v0.17
# # dsXStemp = xr.Dataset(subset)
# # # Convert to Xarray & normalise
# # dsXS = dsXStemp.to_array().drop('variable') # This leaves 'variable' dim with NaNs? (On Jake, xr v0.14) #.rename({'variable':'channel'}).squeeze()
# dsXS = dsXStemp.to_array().sum('variable').squeeze()
# # dsXS['total'] = dsXS.sum('channel') # Sum over channels
# # # Normalise...
# # dsXS = dsXS.sum('variable') # Collapse redundant dim - should be a better way?
# # dsXS['total'] = dsXS.sum('Orb') # Total XS per bond lenght setting
# # dsXS = dsXS/dsXS['total']
#*** Tidying & retesting, xr v0.17
# This is OK if Ehv dim is dropped, otherwise get errors.
for key in data.data.keys():
if key != 'conv':
temp = data.data[key]['XS'].sel({'Sym':'All'}).drop('Ehv').squeeze() #.copy().squeeze() #.drop('Cont')
subset[key] = temp.copy().expand_dims({'Orb':[(key.split('-')[0])]}).expand_dims({'Bond':[float(key.split('-')[-1])]})
# dsXStemp = xr.Dataset(subset, compat='override') # This is working OK for subset as full dict with expanded dims, xr v0.14, but fails in v0.17
dsXStemp = xr.Dataset(subset)
# # Convert to Xarray & normalise
# dsXS = dsXStemp.to_array().drop('variable') # This leaves 'variable' dim with NaNs? (On Jake, xr v0.14) #.rename({'variable':'channel'}).squeeze()
dsXS = dsXStemp.to_array().sum('variable').squeeze()
# dsXS['total'] = dsXS.sum('channel') # Sum over channels
# # Normalise...
# dsXS = dsXS.sum('variable') # Collapse redundant dim - should be a better way?
dsXS['total'] = dsXS.sum('Orb') # Total XS per bond lenght setting
dsXS = dsXS/dsXS['total']
# Plot per bond-length
# Set specific cmap
cmapset = sns.color_palette("dark", dsXS.Orb.size) # bondRange[2]-1)
sns.set_palette(cmapset)
Etype = 'Eke'
Erange = ErangeBR # [1,100]
gauge = 'L'
XCtype = 'SIGMA'
# N2Odata.plotGetCroComp(pType='SIGMA', Etype=Etype, Erange=Erange, pGauge = gauge);
dsXS.sel(**{Etype:slice(Erange[0], Erange[1])}, Type=gauge, XC = XCtype).plot.line(x=Etype, row='Bond');
# Set specific cmap
cmapset = sns.color_palette("dark", dsXS.Bond.size) # bondRange[2]-1)
sns.set_palette(cmapset)
Etype = 'Eke'
Erange = ErangeBR # [1,100]
gauge = 'L'
XCtype = 'SIGMA'
# N2Odata.plotGetCroComp(pType='SIGMA', Etype=Etype, Erange=Erange, pGauge = gauge);
dsXS.sel(**{Etype:slice(Erange[0], Erange[1])}, Type=gauge, XC = XCtype).plot.line(x=Etype, row='Orb');
xr.__version__
# Stack XS data to new data structure for BR processing
# Version 06/05/21 for bond-length scans - set to dict then stack to XR DataSet for processing.
# Dev for BR calcs, http://jake/jupyter/user/paul/lab/tree/ePS/N2O/epsproc/N2O_bondscan_NN_orb1-3_processing_070521.ipynb
# See also https://phockett.github.io/ePSdata/XeF2-preliminary/XeF2_ePS-expt_comp_271020_4d_v111120-dist.html#Branching-ratios
# Bit messy but working OK.
subset = {}
lText = []
Etype = 'Eke'
ErangeBR = [1, 300] # May be necessary to set this in some cases, set to None to skip
#**** Working version XR v0.14
# for orb in ['orb1','orb2', 'orb3']:
# # subset = {}
# for key in data.data.keys():
# if key.startswith(orb):
# temp = data.data[key]['XS'].sel({'Sym':'All'}).squeeze() #.copy().squeeze() #.drop('Cont')
# subset[key] = temp.copy().expand_dims({'Orb':[(key.split('-')[0])]}).expand_dims({'Bond':[float(key.split('-')[-1])]})
# dsXStemp = xr.Dataset(subset, compat='override') # This is working OK for subset as full dict with expanded dims, xr v0.14, but fails in v0.17
# # dsXStemp = xr.Dataset(subset)
# # # Convert to Xarray & normalise
# # dsXS = dsXStemp.to_array().drop('variable') # This leaves 'variable' dim with NaNs? (On Jake, xr v0.14) #.rename({'variable':'channel'}).squeeze()
# dsXS = dsXStemp.to_array().sum('variable').squeeze()
# # dsXS['total'] = dsXS.sum('channel') # Sum over channels
# # # Normalise...
# # dsXS = dsXS.sum('variable') # Collapse redundant dim - should be a better way?
# # dsXS['total'] = dsXS.sum('Orb') # Total XS per bond lenght setting
# # dsXS = dsXS/dsXS['total']
#*** Tidying & retesting, xr v0.17
# This is OK if Ehv dim is dropped, otherwise get errors.
for key in data.data.keys():
if key != 'conv':
temp = data.data[key]['XS'].sel({'Sym':'All'}).drop('Ehv').squeeze() #.copy().squeeze() #.drop('Cont')
subset[key] = temp.copy().expand_dims({'Orb':[(key.split('-')[0])]}).expand_dims({'Bond':[float(key.split('-')[-1])]})
# dsXStemp = xr.Dataset(subset, compat='override') # This is working OK for subset as full dict with expanded dims, xr v0.14, but fails in v0.17
dsXStemp = xr.Dataset(subset)
# # Convert to Xarray & normalise
# dsXS = dsXStemp.to_array().drop('variable') # This leaves 'variable' dim with NaNs? (On Jake, xr v0.14) #.rename({'variable':'channel'}).squeeze()
dsXS = dsXStemp.to_array().sum('variable').squeeze()
# dsXS['total'] = dsXS.sum('channel') # Sum over channels
# # Normalise...
# dsXS = dsXS.sum('variable') # Collapse redundant dim - should be a better way?
# dsXS['total'] = dsXS.sum('Orb') # Total XS per bond lenght setting
# dsXS = dsXS/dsXS['total']
# Interp, see https://phockett.github.io/ePSdata/XeF2-preliminary/XeF2_ePS-expt_comp_271020_4d_v111120-dist.html#Branching-ratios
# Xarray: http://xarray.pydata.org/en/stable/user-guide/interpolation.html
# Uses Scipy1D: https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.interp1d.html#scipy.interpolate.interp1d
interpFlag = True # Run interpolation routine?
if interpFlag:
# Original calcs 0.1 - 312.6, 2.5eV steps
# Updated calcs 0.1 - 300.1, 1.0eV steps
# Set new axis
# Erange = [0.1, 30.1]
Erange = [0.1, 312.6]
# Erange = [0.1, 300.1]
EkeInt = np.arange(Erange[0], Erange[1], 2.5/4)
# Add bond interp, for heavy case only have 0.1A steps
Brange = [1.5, 2.4]
# BInt = np.arange(Brange[0], Brange[1], 0.025)
BInt = np.around(np.arange(Brange[0], Brange[1], 0.05),3)
# .sel({'Eke':slice(0.1,30.1)}) # Don't need to slice, since will be subselected on interp
# dsInt = dsXS.dropna('Eke').interp(Eke = EkeInt, method = 'cubic')
# dsInt = dsXS.interp(Eke = EkeInt, method = 'quadratic') # OK, aside from < 5eV
# dsInt = dsXS.interp(Eke = EkeInt, method = 'cubic') # With additional bond interp
dsInt = dsXS.interp(Eke = EkeInt, Bond = BInt, method = 'cubic') # With additional bond interp
# dsInt = dsXS.interp(Eke = EkeInt, method = 'cubic', kwargs={"fill_value": 0.0, "bounds_error": True} ) #fill_value = (0,20)) # Testing additional args - bonunds only on y!
# dsInt = dsXS.interp(Eke = EkeInt, method = 'nearest') # Very square!
# Fix -ve values in XS - SURE THERE IS A SIMPLE WAY TO DO THIS!!!
# dsInt = dsInt.where(dsInt.sel(XC='SIGMA')>0, 0) # Sets for all data series,not just SIGMA
# dsInt = dsInt.where(dsInt[:,:,0,:]>0, 0) # Also no. Both XC dims still
# dsInt[:,:,0,:] = dsInt.where(dsInt[:,:,0,:]>0, 0) # Also no. Throws errir
dsInt.loc[:,:,:,'SIGMA',:] = dsInt.sel(XC='SIGMA').where(dsInt.sel(XC='SIGMA')>0, 1e-4) # OK
# dsInt.loc[:,:,'SIGMA',:] = dsInt.sel(XC='SIGMA').where(dsInt>0, 0) # Nope
else:
dsInt = dsXS.copy()
from scipy.ndimage import gaussian_filter
# Basic 2D Gaussian filter... NOTE sigma ordering == Xarray dim order = [Bond,Eke] in test case
# This works OK on selected dims.
# If .copy() is omitted it also sets data in original da too.
sel = {'XC':'SIGMA', 'Type':'L'}
daFilt = dsInt.sel(sel).copy()
sigma = [4, 0, 0.8] # In array units, must match array dim ordering!
daFilt[...] = gaussian_filter(daFilt, sigma = sigma) # This works for 2D conv. Also looks OK for 3D case in quick tests with all Orb data.
# Sigma in real units
sigmaScaled = [sigma[0]*daFilt.Bond.diff('Bond')[0].values, sigma[2]*daFilt.Eke.diff('Eke')[0].values]
print(sigmaScaled)
# dsInt.sel({'XC':'SIGMA','Type':'V'}).plot()
# dsInt.sel(sel).plot(col = 'Orb')
gauge = 'L'
XC = 'SIGMA'
Erange = [0, 30]
# Plot original data
dsXS.sel(Eke=slice(Erange[0], Erange[1]), Type=gauge, XC = XC).plot(col = 'Orb')
# Plot interpolated data
dsInt.sel(Eke=slice(Erange[0], Erange[1]), Type=gauge, XC = XC).plot(col = 'Orb')
# Plot convolved data
daFilt.sel(Eke=slice(Erange[0], Erange[1])).plot(col = 'Orb')
# Basic 2D/3D Gaussian filter...
# This works OK on selected dims.
# If .copy() is omitted it also sets data in original da too.
sel = {'XC':'SIGMA', 'Type':'L'}
daSet = {}
sEke = 0.8
sigmas = []
for sVal in np.arange(0, 16, 2.0):
# sigma = [sVal,sVal] # In array units, both dims
sigma = [sVal,0,sEke] # With different sigma Eke
daFilt = dsInt.sel(sel).copy()
# daFilt[:,:] = gaussian_filter(daFilt, sigma = sigma) # This works for 2D conv.
daFilt[...] = gaussian_filter(daFilt, sigma = sigma) # This works for 2D conv.
# Sigma in real units
# sigmaScaled = [sigma[0]*daFilt.Bond.diff('Bond')[0].values, sigma[1]*daFilt.Eke.diff('Eke')[0].values]
# print(sVal, sigmaScaled)
sigmas.append([sVal, sigma[0]*daFilt.Bond.diff('Bond')[0].values, sigma[2]*daFilt.Eke.diff('Eke')[0].values])
daSet[sVal] = daFilt.copy()
# Restact data
daFiltSigma = xr.Dataset(daSet).to_array(dim='sigma') #.sel({'Bond':2.0})
# daFiltSigma['Ehv'] = ('Eke', daFilt.Eke.values+75) # Set approx Ehv vals.
daFiltSigma['sigmaBond'] = ('sigma', np.array(sigmas)[:,1].round(2))
np.array(sigmas)[:,1].round(2)
# daFiltSigma.Bond[6]
# daFiltSigma.Bond[6]
# type(np.float64(1.95))
# daFiltSigma.sel(Bond=np.float32(1.95), Eke=slice(0,50)).swap_dims({'sigma':'sigmaBond'}).plot.line(x='Eke'); # Currently not working for float selection?
# Might be dType issue? https://github.com/pydata/xarray/issues/3137
# ACTUALLY, likely rounding for interp bond lengths!!!!
daFiltSigma.sel(Bond=1.95, Eke=slice(0,50)).swap_dims({'sigma':'sigmaBond'}).plot.line(x='Eke', row='Orb'); # Float selection OK from array value
# Basic 2D Gaussian filter...
# This works OK on selected dims.
# If .copy() is omitted it also sets data in original da too.
sel = {'XC':'BETA', 'Type':'L'}
daSet = {}
sEke = 0.8
for sVal in np.arange(0, 16, 2.0):
# sigma = [sVal,sVal] # In array units, both dims
sigma = [sVal,0,sEke] # With different sigma Eke
daFilt = dsInt.sel(sel).copy()
daFilt[...] = gaussian_filter(daFilt, sigma = sigma) # This works for 2D conv.
# Sigma in real units
sigmaScaled = [sigma[0]*daFilt.Bond.diff('Bond')[0].values, sigma[2]*daFilt.Eke.diff('Eke')[0].values]
print(sVal, sigmaScaled)
daSet[sVal] = daFilt.copy()
# Restact data
daFiltBeta = xr.Dataset(daSet).to_array(dim='sigma') #.sel({'Bond':2.0})
# daFiltBeta['Ehv'] = ('Eke', (75+30)-daFilt.Eke.values) # Set approx Ehv vals.
daFiltBeta['sigmaBond'] = ('sigma', np.array(sigmas)[:,1].round(2))
# daFiltSigma.Bond[6]
# type(np.float64(1.95))
# daFiltSigma.sel(Bond=np.float32(1.95), Eke=slice(0,50)).swap_dims({'sigma':'sigmaBond'}).plot.line(x='Eke'); # Currently not working for float selection?
# Might be dType issue? https://github.com/pydata/xarray/issues/3137
# ACTUALLY, likely rounding for interp bond lengths!!!!
daFiltBeta.sel(Bond=daFiltSigma.Bond[6], Eke=slice(0,50)).swap_dims({'sigma':'sigmaBond'}).plot.line(x='Eke', row='Orb'); # Float selection OK from array value
# Restack & push convolution results to data structure for plotters
# ACTUALLY THIS IS A PAIN - current plotters assume a lot of dims to match ePS output, not subelections.
# UGH WRITE BETTER CODE
data.data['conv'] = {}
data.data['conv']['XS'] = xr.concat([daFiltSigma, daFiltBeta], dim = 'XC')
# data.data['conv']['XS']['Ehv'] = ('Eke', (75+30)-data.data['conv']['XS'].Eke.values) # Set approx Ehv vals.
data.data['conv']['XS']['Ehv'] = ('Eke', 75 + data.data['conv']['XS'].Eke.values) # Set approx Ehv vals.
# data.data['conv']['XS'].expand_dims({'Type':['L']}) #,'Sym':[('All','All')]})
sel = {'XC':'SIGMA', 'Bond':data.data['conv']['XS'].Bond[6], 'Eke':slice(0,30)}
data.data['conv']['XS'].sel(sel).swap_dims({'sigma':'sigmaBond'}).plot(col='Orb')
# plt.savefig('xef2_XS_orb21-bondscan_Gconv_120521.png', format='png')
sel = {'XC':'BETA', 'Bond':data.data['conv']['XS'].Bond[6], 'Eke':slice(0,30)}
data.data['conv']['XS'].sel(sel).swap_dims({'sigma':'sigmaBond'}).plot(col='Orb')
# plt.savefig('xef2_beta_orb21-bondscan_Gconv_120521.png', format='png')
MODIFIED here to plot convolved bond-scan results.
# Load experimental data
# dataPathExpt = Path(r'D:\projects\XeF2_Soleil_2019\RF_data_analysis_021020')
# dataPathExpt = Path(r'~/ePS/XeF2_2020/RF_data_analysis_021020') # On Jake
dataPathExpt = Path(r'/home/paul/ePS/XeF2_2020/RF_data_analysis_021020') # On Jake
# Set data attribs in dict similar to ePS results structure
dataFiles = {}
dataFiles['SIGMA'] = {'Data':'XS', 'File':r'XeF2_Xe4d_4d32_4d52_cross_sec_all_photon_energies_02102020.dat',
'States':['$4d_{3/2}$', '$4d_{5/2}$']}
dataFiles['BETA'] = {'Data':'Beta', 'File':r'XeF2_Xe4d_beta_all_photon_energies_02102020.dat',
'States':['$\Pi_{1/2} (4d_{3/2})$', '$\Delta_{3/2} (4d_{3/2})$', '$\Sigma_{1/2} (4d_{5/2})$',
'$\Pi_{3/2} (4d_{5/2})$', '$\Delta_{5/2} (4d_{5/2})$']}
dataFiles['BR-All'] = {'Data':'Branching ratios', 'File':r'XeF2_Xe4d_branching_all_photon_energies_all_states_01112020.dat',
'States':['$\Pi_{1/2} (4d_{3/2})$', '$\Delta_{3/2} (4d_{3/2})$', '$\Sigma_{1/2} (4d_{5/2})$',
'$\Pi_{3/2} (4d_{5/2})$', '$\Delta_{5/2} (4d_{5/2})$']}
dataFiles['BR-SO-summed'] = {'Data':'Branching ratios SO summed', 'File':r'XeF2_Xe4d_branching_all_photon_energies_SO_av_01112020.dat',
'States':['$\Pi$', '$\Delta$', '$\Sigma$']}
# Update with ion data
dataFiles2 = {}
dataFiles2['ION-low'] = {'Data':'XS', 'File':r'XeF2_ion_yield_low_energy_cal.txt'}
dataFiles2['ION-high'] = {'Data':'XS', 'File':r'XeF2_ion_yield_high_energy_cal.txt'}
# Update with branching ratios
# dataFilesBR = {}
# dataFilesBR['All'] = {'Data':'Branching ratios', 'File':r'XeF2_Xe4d_branching_all_photon_energies_all_states_01112020.dat',
# 'States':['\pi1/2 (4d3/2)', '\delta3/2 (4d3/2)', '\sigma1/2 (4d5/2)', '\pi3/2 (4d5/2)', '\delta5/2 (4d5/2)']}
# dataFiles['SO-summed'] = {'Data':'Branching ratios SO summed', 'File':r'XeF2_Xe4d_branching_all_photon_energies_SO_av_01112020.dat',
# 'States':['\pi', '\delta', '\sigma']}
# Read data files and convert to Xarray
# 27/10/20 added quick hack to set 2nd array for ion yield data
dataList = []
dataList2 = []
for key in dataFiles:
dataIn = np.loadtxt(dataPathExpt/dataFiles[key]['File'])
# Convert to Xarray
dataXr = xr.DataArray(dataIn[:,1:], dims=('Ehv','State'), coords={'Ehv':dataIn[:,0], 'State':dataFiles[key]['States'][0:dataIn.shape[1]-1]})
dataXr.attrs = dataFiles[key]
dataXr.attrs['dataPath'] = dataPathExpt
dataXr.name = key
dataList.append(dataXr)
# Stack to Xarray
dataExpt = xr.concat(dataList, "XC")
dataExpt['XC'] = list(dataFiles.keys())
for key in dataFiles2:
dataIn = np.loadtxt(dataPathExpt/dataFiles2[key]['File'])
# Convert to Xarray
dataXr = xr.DataArray(dataIn[:,1].squeeze(), dims=('Ehv'), coords={'Ehv':dataIn[:,0]}) # Only 1D datasets in this case
dataXr.attrs = dataFiles2[key]
dataXr.attrs['dataPath'] = dataPathExpt
dataXr.name = key
dataList2.append(dataXr)
dataExpt2 = xr.concat(dataList2, "XC")
dataExpt2['XC'] = list(dataFiles2.keys())
# sns.color_palette("cubehelix")
# sns.set_palette("Reds")
# sns.set_palette("Paired")
# Compare with computational results
marker = 'x'
Etype = 'Ehv'
Erange = [70, 250]
bond = 1.95 # data.data['conv']['XS'].Bond[6] #1.9 # Sub selection for convolution data
orb = 'orb21_A1G'
pType = 'SIGMA'
selConv = {'XC':pType,'Bond':bond, 'Orb':orb} # Sub selection for convolution data
# pltObj, lTextComp = data.plotGetCroComp(pGauge=None, pSym=None, keys = 'conv', pType=pType, Etype = Etype, Erange = Erange, returnHandles = True)
# pltObj = data.data['conv']['XS'].sel(selConv).swap_dims({'Eke':'Ehv'}).plot.line(x='Ehv', col='Orb');
pltObj = data.data['conv']['XS'].sel(selConv).swap_dims({'Eke':'Ehv'}).plot.line(x='Ehv');
# lText = lTextComp.copy()
lText = data.data['conv']['XS'].sel(selConv).sigmaBond.data.tolist()
# Add expt data - cross-secions
# refKey = 'orb21_A1G-1.500'
# scale = dataExpt.sel({'XC':pType}).max() / data.dataSets[refKey]['XS'].sel({'XC':pType, 'Type':'L'}).max() # Set scaling to match computational data
refKey = 'conv'
scale = dataExpt.sel({'XC':pType}).max() / data.data[refKey]['XS'].sel(selConv).max() # Set scaling to match computational data
# scale = 1
(dataExpt.sel({'XC':pType})/scale).dropna('State').plot.line(x='Ehv', marker=marker, ls='--');
# Add expt data - ion yields
# ionData = 'ION-high'
# scale = dataExpt2.sel({'XC':ionData}).max() / data.dataSets[refKey]['XS'].sel({'XC':pType}) #, 'Type':'L'}).max() # Set scaling to match computational data
# (dataExpt2.sel({'XC':ionData})/scale).plot.line(x='Ehv', marker=marker, ls='--');
# Manual legend fix
lText.extend(dataExpt.sel({'XC':pType}).dropna('State').coords['State'].data)
# lText.extend([ionData])
plt.legend(lText)
# Fix axis labels
if pType == 'SIGMA':
plt.ylabel('Cross-section/Mb')
# plt.title('Cross-sections (normalised to computational results)')
plt.title('')
else:
plt.ylabel(r"$\beta_{LM}$")
# plt.title(r"$\beta_{LM}$")
# Fix plot x-axis
plt.xlim(Erange);
plt.yscale("log") # Use log scale y-axis?
plt.title(f"{orb} data for various $\sigma$ (Angs.), vs. experiment");
# sns.color_palette("cubehelix")
# sns.set_palette("Reds")
# sns.set_palette("Paired")
# Compare with computational results
# marker = None #'x'
# Etype = 'Ehv'
# Erange = [70, 300]
pType = 'BETA'
selConv = {'XC':pType,'Bond':bond, 'Orb':orb} # Sub selection for convolution data
# pltObj, lTextComp = data.plotGetCroComp(pGauge=None, pSym=None, keys = 'conv', pType=pType, Etype = Etype, Erange = Erange, returnHandles = True)
pltObj = data.data['conv']['XS'].sel(selConv).swap_dims({'Eke':'Ehv'}).plot.line(x='Ehv');
# lText = lTextComp.copy()
lText = data.data['conv']['XS'].sel(selConv).sigmaBond.data.tolist()
# Add expt data - cross-secions
# refKey = 'orb21_A1G-1.500'
# scale = dataExpt.sel({'XC':pType}).max() / data.dataSets[refKey]['XS'].sel({'XC':pType, 'Type':'L'}).max() # Set scaling to match computational data
refKey = 'conv'
scale = dataExpt.sel({'XC':pType}).max() / data.data[refKey]['XS'].sel(selConv).max() # Set scaling to match computational data
# scale = 1
(dataExpt.sel({'XC':pType})/scale).dropna('State').plot.line(x='Ehv', marker=marker, ls='--');
# Add expt data - ion yields
# ionData = 'ION-high'
# scale = dataExpt2.sel({'XC':ionData}).max() / data.dataSets[refKey]['XS'].sel({'XC':pType}) #, 'Type':'L'}).max() # Set scaling to match computational data
# (dataExpt2.sel({'XC':ionData})/scale).plot.line(x='Ehv', marker=marker, ls='--');
# Manual legend fix
lText.extend(dataExpt.sel({'XC':pType}).dropna('State').coords['State'].data)
# lText.extend([ionData])
plt.legend(lText)
# Fix axis labels
if pType == 'SIGMA':
plt.ylabel('Cross-section/Mb')
# plt.title('Cross-sections (normalised to computational results)')
plt.title('')
else:
plt.ylabel(r"$\beta_{LM}$")
# plt.title(r"$\beta_{LM}$")
# Fix plot x-axis
plt.xlim(Erange);
# plt.yscale("log") # Use log scale y-axis?
plt.title(f"{orb} data for various $\sigma$ (Angs.), vs. experiment");
# All orbs, set sigma
# sns.color_palette("cubehelix")
# sns.set_palette("Reds")
# sns.set_palette("Paired")
# Compare with computational results
# marker = None #'x'
# Etype = 'Ehv'
# Erange = [70, 300]
pType = 'SIGMA'
selConv = {'XC':pType,'Bond':bond, 'sigma':4.0} # Sub selection for convolution data
# pltObj, lTextComp = data.plotGetCroComp(pGauge=None, pSym=None, keys = 'conv', pType=pType, Etype = Etype, Erange = Erange, returnHandles = True)
pltObj = data.data['conv']['XS'].sel(selConv).swap_dims({'Eke':'Ehv'}).plot.line(x='Ehv');
lText = data.data['conv']['XS'].sel(selConv).Orb.data.tolist()
sigmaBond = data.data['conv']['XS'].sel(selConv).sigmaBond.data
# Add expt data - cross-secions
# refKey = 'orb21_A1G-1.500'
# scale = dataExpt.sel({'XC':pType}).max() / data.dataSets[refKey]['XS'].sel({'XC':pType, 'Type':'L'}).max() # Set scaling to match computational data
refKey = 'conv'
scale = dataExpt.sel({'XC':pType}).max() / data.data[refKey]['XS'].sel(selConv).max() # Set scaling to match computational data
# scale = 1
(dataExpt.sel({'XC':pType})/scale).dropna('State').plot.line(x='Ehv', marker=marker, ls='--');
# Add expt data - ion yields
# ionData = 'ION-high'
# scale = dataExpt2.sel({'XC':ionData}).max() / data.dataSets[refKey]['XS'].sel({'XC':pType}) #, 'Type':'L'}).max() # Set scaling to match computational data
# (dataExpt2.sel({'XC':ionData})/scale).plot.line(x='Ehv', marker=marker, ls='--');
# Manual legend fix
lText.extend(dataExpt.sel({'XC':pType}).dropna('State').coords['State'].data)
# lText.extend([ionData])
plt.legend(lText)
# Fix axis labels
if pType == 'SIGMA':
plt.ylabel('Cross-section/Mb')
# plt.title('Cross-sections (normalised to computational results)')
plt.title('')
else:
plt.ylabel(r"$\beta_{LM}$")
# plt.title(r"$\beta_{LM}$")
# Fix plot x-axis
plt.xlim(Erange);
# plt.yscale("log") # Use log scale y-axis?
plt.title(f"$\sigma$ = {sigmaBond} (Angs.), vs. experiment");
# All orbs, set sigma
# sns.color_palette("cubehelix")
# sns.set_palette("Reds")
# sns.set_palette("Paired")
# Compare with computational results
# marker = None #'x'
# Etype = 'Ehv'
# Erange = [70, 300]
pType = 'BETA'
selConv = {'XC':pType,'Bond':bond, 'sigma':4.0} # Sub selection for convolution data
# pltObj, lTextComp = data.plotGetCroComp(pGauge=None, pSym=None, keys = 'conv', pType=pType, Etype = Etype, Erange = Erange, returnHandles = True)
pltObj = data.data['conv']['XS'].sel(selConv).swap_dims({'Eke':'Ehv'}).plot.line(x='Ehv');
lText = data.data['conv']['XS'].sel(selConv).Orb.data.tolist()
# Add expt data - cross-secions
# refKey = 'orb21_A1G-1.500'
# scale = dataExpt.sel({'XC':pType}).max() / data.dataSets[refKey]['XS'].sel({'XC':pType, 'Type':'L'}).max() # Set scaling to match computational data
refKey = 'conv'
scale = dataExpt.sel({'XC':pType}).max() / data.data[refKey]['XS'].sel(selConv).max() # Set scaling to match computational data
# scale = 1
(dataExpt.sel({'XC':pType})/scale).dropna('State').plot.line(x='Ehv', marker=marker, ls='--');
# Add expt data - ion yields
# ionData = 'ION-high'
# scale = dataExpt2.sel({'XC':ionData}).max() / data.dataSets[refKey]['XS'].sel({'XC':pType}) #, 'Type':'L'}).max() # Set scaling to match computational data
# (dataExpt2.sel({'XC':ionData})/scale).plot.line(x='Ehv', marker=marker, ls='--');
# Manual legend fix
lText.extend(dataExpt.sel({'XC':pType}).dropna('State').coords['State'].data)
# lText.extend([ionData])
plt.legend(lText)
# Fix axis labels
if pType == 'SIGMA':
plt.ylabel('Cross-section/Mb')
# plt.title('Cross-sections (normalised to computational results)')
plt.title('')
else:
plt.ylabel(r"$\beta_{LM}$")
# plt.title(r"$\beta_{LM}$")
# Fix plot x-axis
plt.xlim(Erange);
# plt.yscale("log") # Use log scale y-axis?
plt.title(f"$\sigma$ = {sigmaBond} (Angs.), vs. experiment");
Code adapted from https://phockett.github.io/ePSdata/XeF2-preliminary/xe-xef2_plots-notes_220421.html#Spin-orbit
from epsproc.geomFunc.geomUtils import genllLList
from epsproc.geomFunc.geomCalc import w3jTable
# Gen QNs for specific (L,S) case
L = 2
S = 0.5
Llist = np.array([[L,L+S,S], [L, L, S], [L,L-S,S]]) # Note this needs to be 2D array in current form of function
QNs = genllLList(Llist, uniqueFlag = False)
# Then calc 3js....
backend = 'sympy'
form = 'xdaLM' # '2d' # 'xdaLM' # xds
nonzeroFlag = True
dlist = ['L', 'J', 'S', 'Lambda', 'Omega', 'Sigma'] # Set dims for reference
thrj = w3jTable(QNs = QNs, nonzeroFlag = nonzeroFlag, form = form, dlist = dlist, backend = backend)
# And primed terms (will be identical at this point, but set dims for multiplication later)
thrjP = w3jTable(QNs = QNs, nonzeroFlag = nonzeroFlag, form = form, dlist = [item + 'p' for item in dlist], backend = backend)
# Reformate by comsolidating +/- terms as modulation for XS and square
thrjUS = thrj.unstack('mSet').fillna(0)
# thrjXSmod = 2*(thrjUS.sum('Sigma').where((thrjUS['Lambda']>-0.5)&(thrjUS['Omega']<0.5), drop=True))**2 # Note Lambda & Omega anti-phase!
thrjXSmod = 2*(thrjUS.sum('Sigma').where((thrjUS['Lambda']>-0.5)&(thrjUS['Omega']<0.5), drop=True))**2 # Note Lambda & Omega anti-phase!
thrjXSmod['Omega'] = np.abs(thrjXSmod['Omega'])
pdTable, _ = ep.multiDimXrToPD(thrjXSmod, colDims = 'Lambda')
pdTable
# SO branching ratios
# 11/11/20 - converted to function
def simulateSOBR(dsXS, thrjSOTerm, BRtype = 'All', coupling = 'Lambda', stateLabels = ['$\Sigma$', '$\Pi$', '$\Delta$']):
# Set Lambda terms
# dsXS['Lambda'] = dsXS['channel']
# dsXS['Lambda'] = [0.0, 1.0, 2.0]
dsXSO = dsXS.assign_coords({coupling:('channel',thrjSOTerm[coupling])}).swap_dims({'channel':coupling}) # Assign Lambda for multiplication
dsXSO = dsXSO.assign_coords({f'{coupling}-Labels':(coupling, np.asarray(stateLabels)[dsXSO[coupling].astype(int).data])}) # Assign labels, based on int values as indexers (CRUDE!)
dsXSO = dsXSO * thrjSOTerm
# dsXS
# Convert to branching ratios (total)
if BRtype == 'All':
# dsXSO['total'] = dsXSO.sum(['Lambda', 'Omega', 'lSet']) # Sum over channels
dsXSO['total'] = dsXSO.sum(thrjSOTerm.dims)
elif BRtype == 'J':
dsXSO['total'] = dsXSO.sum(['Lambda', 'Omega']) # Sum over channels, except J - GIVES same result?
elif BRtype == 'None':
dsXSO['total'] = 1.0
else:
print(f'BRtype={BRtype} not recognised.')
return
# # Set branching ratios
dsXSO = dsXSO/dsXSO['total']
return dsXSO
Label levels by ($\Omega$, $\Lambda$), with no additional summation over terms. Q: is this justified/realistic?
NOTE: seeing good results with Omega coupling scheme for 5/2, but better results for Lambda with 3/2 - different ion state ordering? Other effects going on?
SigmaBond = 0.2 (sigma = 4.0 in grid units) looks reasonable - similar to previous results sets, with some smoothing and clamping of values.
TODO: replot with hvplot for a closer look!
selConv
# Set up dsXS as previously (subselect on sigma) - TODO try multiple sigma? Should propagate OK - seeing some quite different results vs. sigma as expected.
# selConv['XC'] = 'SIGMA' # Use same selection criteria as above, or modify
# selConv['sigma'] = 4.0
# selConv.pop('sigma') # Remove sigma for all sigma plotting
pType = 'SIGMA'
selConv = {'XC':pType,'Bond':bond, 'sigma':4.0}
dsXS = data.data['conv']['XS'].sel(selConv).rename({'Orb':'channel'})
dsXS['total'] = dsXS.sum('channel') # Sum over channels
# Normalise... Q: is this required in this case? Probably!
dsXS = dsXS/dsXS['total']
# Test E-shift... As previously ~10eV shift looks pretty good, for 3/2 case at least.
# dsXS['Ehv'] = dsXS.Ehv + 10.0
dsXS
pType = 'BR-All'
Etype = 'Ehv'
coupling = 'Omega' # 'Lambda' # 'Omega'
# Selected term(s) only - LOOKS GOOD FOR coupling='Omega' case, but opposite phases for coupling='Lambda'
# dsXSO = simulateSOBR(dsXS, thrjXSmod, BRtype = 'None', coupling='Omega') # This looks good
dsXSO = simulateSOBR(dsXS, thrjXSmod, BRtype = 'None', coupling=coupling)
# dsXSO = simulateSOBR(dsXS, np.abs(thrjXSmodCoherent), BRtype = 'None', coupling='Lambda') # Testing coherent case
# Unnorm
# dsXSO *= dsXSO['total']
# Selected term(s) only
J=1.5
OLset = [[2,1.5], [1,0.5]] # [Omega, Lambda] pairs to match expt.
# OLset = [[1,0.5],[2,1.5]] # [Omega, Lambda] pairs to match expt.
# OLset = [[1.5,2], [0.5,1]] # Check ordering OK!
# J=2.5
# OLset = [[2,2.5], [1,1.5], [0,0.5]] # [Omega, Lambda] pairs to match expt.
# subset = xr.DataArray()
subset = []
for OL in OLset:
# subset.append((dsXSO).swap_dims({'Eke':'Ehv'}).sel(**{Etype:slice(Erange[0], Erange[1])}, Type='L', J=J, Lambda=OL[0], Omega=OL[1]))
subset.append((dsXSO).swap_dims({'Eke':'Ehv'}).sel(**{Etype:slice(Erange[0], Erange[1])}, J=J, Lambda=OL[0], Omega=OL[1]))
subsetXS = xr.concat(subset, dim='Omega').squeeze()
# Renorm
subsetXS = subsetXS/subsetXS.sum('Omega')
if 'sigma' in subsetXS.dims:
subsetXS.plot.line(x=Etype, row='sigma')
else:
subsetXS.plot.line(x=Etype)
# Selected states only
if J == 1.5:
statesPlot = ['$\\Delta_{3/2} (4d_{3/2})$', '$\\Pi_{1/2} (4d_{3/2})$']
else:
statesPlot = ['$\\Delta_{5/2} (4d_{5/2})$', '$\\Pi_{3/2} (4d_{5/2})$', '$\\Sigma_{1/2} (4d_{5/2})$']
scale = 1
(dataExpt.sel({'XC':pType, 'State':statesPlot})/scale).plot.line(x='Ehv', marker=marker, ls=':');
# Manual legend fix
# lText = list(subsetXS['Omega'].data)
# lText = list(OLset)
lText = [f"{item.split(' ')[0]}$(sim)" for item in statesPlot]
# lText = list(subsetXS['Lambda'].data)
lText.extend(statesPlot)
# lText.extend(dataExpt.sel({'XC':pType}).dropna('State').coords['State'].data)
plt.legend(lText, loc='lower right')
plt.ylabel("Branching ratio")
# plt.title(f'Simulated branching ratios, J={J}, Hund\'s case (c), with subselection (no summation)')
# plt.title("");
plt.title(f"Simulated branching ratios with SO, $\sigma$ = {sigmaBond} (Angs.), vs. experiment");
plt.ylim([0.3, 0.68]); # Set ylims to avoid large peaks
# Set data for export later...
brsoJ1=subsetXS.copy()
Comparison of experimental (dashed lines) and computational (solid lines) branching ratios for $J=3/2$.
# Selected term(s) only
# Selected term(s) only - LOOKS GOOD FOR coupling='Omega' case, but opposite phases for coupling='Lambda'
# For J = 5/2 'Lambda' case is possibly better, at least for state ordering? DO NOT UNDERSTAND THIS YET!
# scale = 1
dsXSO = simulateSOBR(dsXS, thrjXSmod, BRtype = 'None', coupling=coupling)
# Unnorm
# dsXSO *= dsXSO['total']
# Selected term(s) only
# J=1.5
# OLset = [[2,1.5], [1,0.5]] # [Omega, Lambda] pairs to match expt.
J=2.5
# OLset = [[2,2.5], [1,1.5], [0,0.5]] # [Omega, Lambda] pairs to match expt.
OLset = [[0,0.5], [1,1.5], [2,2.5]] # [Omega, Lambda] pairs to match expt - REVERSED STATE ORDERING, justified by ion state energy ordering?
# subset = xr.DataArray()
subset = []
for OL in OLset:
# subset.append((dsXSO).swap_dims({'Eke':'Ehv'}).sel(**{Etype:slice(Erange[0], Erange[1])}, Type='L', J=J, Lambda=OL[0], Omega=OL[1]))
subset.append((dsXSO).swap_dims({'Eke':'Ehv'}).sel(**{Etype:slice(Erange[0], Erange[1])}, J=J, Lambda=OL[0], Omega=OL[1]))
subsetXS = xr.concat(subset, dim='Omega').squeeze()
# Renorm
subsetXS = subsetXS/subsetXS.sum('Omega')
if 'sigma' in subsetXS.dims:
subsetXS.plot.line(x=Etype, row='sigma')
else:
subsetXS.plot.line(x=Etype)
# Selected states only
if J == 1.5:
statesPlot = ['$\\Delta_{3/2} (4d_{3/2})$', '$\\Pi_{1/2} (4d_{3/2})$']
else:
statesPlot = ['$\\Delta_{5/2} (4d_{5/2})$', '$\\Pi_{3/2} (4d_{5/2})$', '$\\Sigma_{1/2} (4d_{5/2})$']
scale = 1
(dataExpt.sel({'XC':pType, 'State':statesPlot})/scale).plot.line(x='Ehv', marker=marker, ls=':');
# Manual legend fix
# lText = list(subsetXS['Omega'].data)
# lText = list(OLset)
lText = [f"{item.split(' ')[0]}$(sim)" for item in statesPlot]
# lText = list(subsetXS['Lambda'].data)
lText.extend(statesPlot)
# lText.extend(dataExpt.sel({'XC':pType}).dropna('State').coords['State'].data)
# plt.legend(lText, loc='upper right')
plt.legend(lText, loc='lower right')
plt.ylabel("Branching ratio")
# plt.title(f'Simulated branching ratios, J={J}, Hund\'s case (c), with subselection (no summation)')
# plt.title('');
plt.title(f"Simulated branching ratios with SO, $\sigma$ = {sigmaBond} (Angs.), vs. experiment");
# plt.ylim([0.1, 0.5]); # Set ylims to avoid large peaks
# Set data for export later...
brsoJ2=subsetXS.copy()
30/07/21
Quick and dirty output to csv with Pandas...
In all cases versions appended "-Ehv" are converted to photon energy scale.
Data matches that plotted at https://phockett.github.io/ePSdata/XeF2-preliminary/xe-xef2_plots-notes_220421.html
data.matEtoPD(keys = 'conv', dataType = 'XS', xDim = 'Eke', printTable = False)
data.data['conv']['XS'].attrs['pd']
# Write consolidated matE to file - raw ePS results
dataPath = Path('/home/paul/ePS/XeF2_2020/epsman_2021/xef2_data_processed_300721')
dataType = 'XS'
for xDim in ['Eke','Ehv']:
# Generate Pandas versions
# data.matEtoPD(dataType = dataType, xDim = 'Eke', selDims = {'Type':'L'}, printTable = False)
data.matEtoPD(dataType = dataType, xDim = xDim, printTable = False)
for key in data.data.keys():
fileName = Path(dataPath, f"XeF2_{key}_{dataType}_300721-{xDim}.csv")
data.data[key][dataType].pd.T.to_csv(fileName, sep = '\t')
# Subselected convolution dataset as plotted above
selDims = {'Bond':1.95, 'sigma':4}
for xDim in ['Eke','Ehv']:
# Generate Pandas versions
data.matEtoPD(keys = 'conv', selDims = selDims, dataType = dataType, xDim = xDim, printTable = False)
fileName = Path(dataPath, f"XeF2_conv_{selDims['Bond']}_{selDims['sigma']}_{dataType}_300721-{xDim}.csv")
data.data[key][dataType].pd.T.to_csv(fileName, sep = '\t')
# Branching ratios with SO
Jout=1.5
dataOut = brsoJ1
# Jout=2.5
# dataOut = brsoJ2
# fileName = f"XeF2_Branching-SO-J{Jout}_070521.csv"
fileName = Path(dataPath, f"XeF2_Branching-SO-J{Jout}_conv_{selDims['Bond']}_{selDims['sigma']}_300721-Eke.csv")
pdOut, _ = ep.multiDimXrToPD(dataOut.swap_dims({'Ehv':'Eke'}), colDims='Eke')
pdOut.T.to_csv(fileName, sep = '\t')
fileName = Path(dataPath, f"XeF2_Branching-SO-J{Jout}_conv_{selDims['Bond']}_{selDims['sigma']}_300721-Ehv.csv")
pdOut, _ = ep.multiDimXrToPD(dataOut, colDims='Ehv')
pdOut.T.to_csv(fileName, sep = '\t')
import scooby
scooby.Report(additional=['epsman', 'epsproc', 'xarray', 'jupyter'])
# Check current Git commit for local ePSproc version
!git -C {Path(ep.__file__).parent} branch
!git -C {Path(ep.__file__).parent} log --format="%H" -n 1
# Check current remote commits
!git ls-remote --heads git://github.com/phockett/ePSproc
# !git ls-remote --heads git://github.com/phockett/epsman